... to be more precise in naming, and also to match Linux.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
goto out;
/* XXX fmt */
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ctx[cpu] = pfm_context_create(&kreq);
if (ctx[cpu] == NULL) {
error = -ENOMEM;
BUG_ON(in_irq());
spin_lock(&xenpfm_context_lock);
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
if (per_cpu(xenpfm_context, cpu) != NULL) {
error = -EBUSY;
break;
}
}
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
per_cpu(xenpfm_context, cpu) = ctx[cpu];
ctx[cpu] = NULL;
}
spin_unlock(&xenpfm_context_lock);
out:
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
if (ctx[cpu] != NULL)
pfm_context_free(ctx[cpu]);
}
need_unload = 0;
BUG_ON(in_irq());
spin_lock_irqsave(&xenpfm_context_lock, flags);
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ctx = per_cpu(xenpfm_context, cpu);
if (ctx == NULL) {
error = -EINVAL;
need_unload = 1;
}
if (error) {
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ctx = per_cpu(xenpfm_context, cpu);
if (ctx == NULL)
break;
goto out;
}
if (need_unload) {
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
spin_unlock_irqrestore(&xenpfm_context_lock, flags);
goto again;
}
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
per_cpu(xenpfm_context, cpu) = NULL;
arg.is_start = is_start;
atomic_set(&arg.started, 1); /* 1 for this cpu */
atomic_set(&arg.finished, 0);
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
arg.error[cpu] = 0;
BUG_ON(!spin_is_locked(&xenpfm_context_lock));
{
int i;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
if (mt_info[i].valid && mt_info[i].socket_id ==
cpu_data(cpu)->socket_id)
mt_info[i].valid = 0;
{
int i;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
if (!mt_info[i].valid)
return i;
int i;
__u32 sid = c->socket_id;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
&& mt_info[i].socket_id == sid) {
c->core_id = mt_info[i].core_id;
spin_lock(&sn2_ptcg_lock2);
node_set(cpu_to_node(smp_processor_id()), nodes_flushed);
i = 0;
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cnode = cpu_to_node(cpu);
if (!node_isset(cnode, nodes_flushed)) {
cpu_set(cpu, selected_cpus);
i++;
}
#else
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
{
unsigned int cpu;
- for_each_cpu ( cpu ) {
+ for_each_possible_cpu ( cpu ) {
perfc_t *perfcounters = per_cpu(perfcounters, cpu);
struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu);
int i, j;
{
unsigned int cpu;
- for_each_cpu ( cpu ) {
+ for_each_possible_cpu ( cpu ) {
struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu);
int i, j;
smp_prepare_cpus(max_cpus);
/* We aren't hotplug-capable yet. */
- for_each_cpu ( i )
- cpu_set(i, cpu_present_map);
+ cpus_or(cpu_present_map, cpu_present_map, cpu_possible_map);
/* Enable IRQ to receive IPI (needed for ITC sync). */
local_irq_enable();
legacy_hpet_event.flags = 0;
spin_lock_init(&legacy_hpet_event.lock);
- for_each_cpu(i)
+ for_each_possible_cpu(i)
per_cpu(cpu_bc_channel, i) = &legacy_hpet_event;
if ( !force_hpet_broadcast )
struct vcpu *v;
printk("CPU\tNMI\n");
- for_each_cpu ( i )
+ for_each_possible_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
* construct cpu_sibling_map, so that we can tell sibling CPUs
* efficiently.
*/
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpus_clear(per_cpu(cpu_core_map, cpu));
}
if ( opt_consistent_tscs )
{
int cpu;
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
}
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
per_cpu(perfcounters, cpu)[j] = 0;
case TYPE_S_SINGLE:
++j;
break;
case TYPE_ARRAY:
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
memset(per_cpu(perfcounters, cpu) + j, 0,
perfc_info[i].nr_elements * sizeof(perfc_t));
case TYPE_S_ARRAY:
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
++j;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
{
perfc_t *counters = per_cpu(perfcounters, cpu) + j;
unsigned int k;
open_softirq(SCHEDULE_SOFTIRQ, schedule);
- for_each_cpu ( i )
+ for_each_possible_cpu ( i )
{
spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
SET_HEAP_SIZE(&dummy_heap, 0);
SET_HEAP_LIMIT(&dummy_heap, 0);
- for_each_cpu ( i )
+ for_each_possible_cpu ( i )
{
spin_lock_init(&per_cpu(timers, i).lock);
per_cpu(timers, i).heap = &dummy_heap;
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
- for_each_cpu ( cpu )
+ for_each_possible_cpu ( cpu )
{
pi = alloc_domheap_pages(0,dstmem_order,0);
per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
*
* int any_online_cpu(mask) First online cpu in mask, or NR_CPUS
*
- * for_each_cpu(cpu) for-loop cpu over cpu_possible_map
+ * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
* for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
*
cpu; \
})
-#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
/* Copy to/from cpumap provided by control tools. */
struct xenctl_cpumap;